}
}
-int hvm_clear_pending_io_event(struct vcpu *v)
+/*
+ * On exit from hvm_wait_io, we're guaranteed not to be waiting on
+ * I/O response from the device model.
+ */
+void hvm_wait_io(void)
{
- struct domain *d = v->domain;
+ struct vcpu *v = current;
+ struct domain *d = v->domain;
int port = iopacket_port(d);
- /* evtchn_pending_sel bit is shared by other event channels. */
- if (!d->shared_info->evtchn_pending[port/BITS_PER_LONG])
+ for ( ; ; )
+ {
+ /* Clear master flag, selector flag, event flag each in turn. */
+ v->vcpu_info->evtchn_upcall_pending = 0;
+ smp_mb__before_clear_bit();
clear_bit(port/BITS_PER_LONG, &v->vcpu_info->evtchn_pending_sel);
+ smp_mb__after_clear_bit();
+ if ( test_and_clear_bit(port, &d->shared_info->evtchn_pending[0]) )
+ hvm_io_assist(v);
- /* Note: HVM domains may need upcalls as well. */
- if (!v->vcpu_info->evtchn_pending_sel)
- clear_bit(0, &v->vcpu_info->evtchn_upcall_pending);
+ /* Need to wait for I/O responses? */
+ if ( !test_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags) )
+ break;
- /* Clear the pending bit for port. */
- return test_and_clear_bit(port, &d->shared_info->evtchn_pending[0]);
-}
+ do_sched_op(SCHEDOP_block, 0);
+ }
-/*
- * Because we've cleared the pending events first, we need to guarantee that
- * all events to be handled by xen for HVM domains are taken care of here.
- *
- * interrupts are guaranteed to be checked before resuming guest.
- * HVM upcalls have been already arranged for if necessary.
- */
-void hvm_check_events(struct vcpu *v)
-{
/*
- * Clear the event *before* checking for work. This should
- * avoid the set-and-check races
+ * Re-set the selector and master flags in case any other notifications
+ * are pending.
*/
- if (hvm_clear_pending_io_event(current))
- hvm_io_assist(v);
-}
-
-/*
- * On exit from hvm_wait_io, we're guaranteed to have a I/O response
- * from the device model.
- */
-void hvm_wait_io(void)
-{
- int port = iopacket_port(current->domain);
-
- do {
- if (!test_bit(port, ¤t->domain->shared_info->evtchn_pending[0]))
- do_sched_op(SCHEDOP_block, 0);
-
- hvm_check_events(current);
- if (!test_bit(ARCH_HVM_IO_WAIT, ¤t->arch.hvm_vcpu.ioflags))
- break;
- /*
- * Events other than IOPACKET_PORT might have woken us up.
- * In that case, safely go back to sleep.
- */
- clear_bit(port/BITS_PER_LONG, ¤t->vcpu_info->evtchn_pending_sel);
- clear_bit(0, ¤t->vcpu_info->evtchn_upcall_pending);
- } while(1);
+ if ( d->shared_info->evtchn_pending[port/BITS_PER_LONG] )
+ set_bit(port/BITS_PER_LONG, &v->vcpu_info->evtchn_pending_sel);
+ if ( v->vcpu_info->evtchn_pending_sel )
+ v->vcpu_info->evtchn_upcall_pending = 1;
}
/*
vmx_stts();
- if (event_pending(v)) {
- hvm_check_events(v);
+ if ( event_pending(v) )
+ hvm_wait_io();
- if (test_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags))
- hvm_wait_io();
- }
/* pick up the elapsed PIT ticks and re-enable pit_timer */
- if ( vpit->first_injected ) {
+ if ( vpit->first_injected )
pickup_deactive_ticks(vpit);
- }
- vmx_set_tsc_shift(v,vpit);
+ vmx_set_tsc_shift(v, vpit);
/* We can't resume the guest if we're waiting on I/O */
ASSERT(!test_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags));